In [1]:
import pickle
import os
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv3D, MaxPooling1D, Conv1D
from tensorflow.keras.backend import clear_session
from tensorflow.keras.callbacks import EarlyStopping
import tensorflow as tf
from ipynb.fs.full.functions import *
In [2]:
path = os.getcwd()
In [3]:
#carrega os dados em forma de dicionario no qual a chave é o sensor e o valor são as medidas
#12 medias por pessoa
#13 pessoas participantes
#156 (12*13) dados por sensor
#6 sensores e seus respectivos outputs (ps: da forma como os dados estão organizados, o output é o mesmo para todos os sensores)
with open (path+'/rede/pickles_data/all_data_dict.pkl','rb') as all_data:
    all_data_dict = pickle.load(all_data)
In [4]:
#separa os dados
dados_sensores_accx = all_data_dict['dados_sensores_accx']
dados_sensores_accy = all_data_dict['dados_sensores_accy']
dados_sensores_accz = all_data_dict['dados_sensores_accz']
dados_sensores_gyrox = all_data_dict['dados_sensores_gyrox']
dados_sensores_gyroy = all_data_dict['dados_sensores_gyroy']
dados_sensores_gyroz = all_data_dict['dados_sensores_gyroz']
lista_pessoas_output_accx = all_data_dict['lista_pessoas_output_accx']
lista_pessoas_output_accy = all_data_dict['lista_pessoas_output_accy']
lista_pessoas_output_accz = all_data_dict['lista_pessoas_output_accz']
lista_pessoas_output_gyrox = all_data_dict['lista_pessoas_output_gyrox']
lista_pessoas_output_gyroy = all_data_dict['lista_pessoas_output_gyroy']
lista_pessoas_output_gyroz = all_data_dict['lista_pessoas_output_gyroz']

Parametros

In [5]:
dicionario_pessoas = enumera_dicionario(['Amanda','Anderson','Carlos','Dias','Gustavo','Henrique','Ian','Jessica','Joao','Junior','Moura','Patrick','Yuri'])
In [6]:
pessoa = 5 #pessoa referencia
janela = 400 #quantidade de dados no vetor
passo = 60 # quantidade de dados entre os vetores
In [7]:
#Separa os vetores de dados que entrarão multiplicados no modelo
dados_modelo, output_modelo = elabora_input_rede([dados_sensores_accx, dados_sensores_accy, dados_sensores_accz,
                                                  dados_sensores_gyrox, dados_sensores_gyroy, dados_sensores_gyroz],
                                                  lista_pessoas_output_accx, pessoa)
In [8]:
#separa os dados em treino e teste, considerando 2/3 para treino
dados_multiplicados_treino = dict()
dados_multiplicados_teste = dict()
for i in dados_modelo:
    dados_modelo_treino_X = dados_modelo[i][[0,1,2,3,4,5,6,7,12,13,14,15,16,17,18,19]]
    dados_modelo_treino_y = output_modelo[i][[0,1,2,3,4,5,6,7,12,13,14,15,16,17,18,19]]
    dados_modelo_teste_X = dados_modelo[i][[8,9,10,11,20,21,22,23]]
    dados_modelo_teste_y = output_modelo[i][[8,9,10,11,20,21,22,23]]
    dados_modelo_treino_X_multiplicados = multiplica_dados(dados_modelo_treino_X,janela,passo,dados_modelo_treino_y)
    dados_modelo_teste_X_multiplicados = multiplica_dados(dados_modelo_teste_X,janela,passo,dados_modelo_teste_y)
    dados_multiplicados_treino[i] = dados_modelo_treino_X_multiplicados
    dados_multiplicados_teste[i] = dados_modelo_teste_X_multiplicados
In [9]:
#3darray
dados_acelerometro_treino = np.dstack((dados_multiplicados_treino[0][0],dados_multiplicados_treino[1][0],dados_multiplicados_treino[2][0]))
dados_giroscopio_treino = np.dstack((dados_multiplicados_treino[3][0],dados_multiplicados_treino[4][0],dados_multiplicados_treino[5][0]))
dados_acelerometro_giroscopio_treino = np.dstack((dados_multiplicados_treino[0][0],dados_multiplicados_treino[1][0],dados_multiplicados_treino[2][0],dados_multiplicados_treino[3][0],dados_multiplicados_treino[4][0],dados_multiplicados_treino[5][0]))

dados_acelerometro_teste = np.dstack((dados_multiplicados_teste[0][0],dados_multiplicados_teste[1][0],dados_multiplicados_teste[2][0]))
dados_giroscopio_teste = np.dstack((dados_multiplicados_teste[3][0],dados_multiplicados_teste[4][0],dados_multiplicados_teste[5][0]))
dados_acelerometro_giroscopio_teste = np.dstack((dados_multiplicados_teste[0][0],dados_multiplicados_teste[1][0],dados_multiplicados_teste[2][0],dados_multiplicados_teste[3][0],dados_multiplicados_teste[4][0],dados_multiplicados_teste[5][0]))


#o output é o mesmo para qualquer vetor de input
dados_rede_y_treino = dados_multiplicados_treino[0][1]
dados_rede_y_teste = dados_multiplicados_teste[0][1]

REDE LSTM

In [10]:
#Acelerometro
X_train = dados_acelerometro_treino.copy()
y_train = dados_rede_y_treino.copy()
X_val = dados_acelerometro_teste.copy()
y_val = dados_rede_y_teste.copy()
In [11]:
model = Sequential()
model.add(LSTM(40, input_shape = (X_train.shape[1], X_train.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(3000, activation = 'relu'))
model.add(Dense(3000, activation = 'relu'))
model.add(Dense(300, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=[X_val, y_val], epochs=50, batch_size=1024, verbose=True, shuffle=True)
Train on 256 samples, validate on 128 samples
Epoch 1/50
256/256 [==============================] - 3s 11ms/sample - loss: 0.6929 - accuracy: 0.4883 - val_loss: 0.6763 - val_accuracy: 0.5938
Epoch 2/50
256/256 [==============================] - 0s 176us/sample - loss: 0.6663 - accuracy: 0.6523 - val_loss: 0.6604 - val_accuracy: 0.5938
Epoch 3/50
256/256 [==============================] - 0s 168us/sample - loss: 0.6295 - accuracy: 0.6406 - val_loss: 0.6701 - val_accuracy: 0.5859
Epoch 4/50
256/256 [==============================] - 0s 179us/sample - loss: 0.6204 - accuracy: 0.6836 - val_loss: 0.6745 - val_accuracy: 0.6016
Epoch 5/50
256/256 [==============================] - 0s 159us/sample - loss: 0.6146 - accuracy: 0.6602 - val_loss: 0.6535 - val_accuracy: 0.6094
Epoch 6/50
256/256 [==============================] - 0s 173us/sample - loss: 0.6042 - accuracy: 0.6758 - val_loss: 0.6719 - val_accuracy: 0.5938
Epoch 7/50
256/256 [==============================] - 0s 172us/sample - loss: 0.6253 - accuracy: 0.6797 - val_loss: 0.6319 - val_accuracy: 0.6328
Epoch 8/50
256/256 [==============================] - 0s 171us/sample - loss: 0.5991 - accuracy: 0.6680 - val_loss: 0.6369 - val_accuracy: 0.6328
Epoch 9/50
256/256 [==============================] - 0s 165us/sample - loss: 0.6133 - accuracy: 0.6758 - val_loss: 0.6278 - val_accuracy: 0.6016
Epoch 10/50
256/256 [==============================] - 0s 172us/sample - loss: 0.5837 - accuracy: 0.6914 - val_loss: 0.6424 - val_accuracy: 0.6328
Epoch 11/50
256/256 [==============================] - 0s 174us/sample - loss: 0.5959 - accuracy: 0.6797 - val_loss: 0.6284 - val_accuracy: 0.5859
Epoch 12/50
256/256 [==============================] - 0s 163us/sample - loss: 0.5688 - accuracy: 0.6875 - val_loss: 0.6265 - val_accuracy: 0.6172
Epoch 13/50
256/256 [==============================] - 0s 173us/sample - loss: 0.5905 - accuracy: 0.6875 - val_loss: 0.6254 - val_accuracy: 0.6250
Epoch 14/50
256/256 [==============================] - 0s 172us/sample - loss: 0.5754 - accuracy: 0.7227 - val_loss: 0.6211 - val_accuracy: 0.5938
Epoch 15/50
256/256 [==============================] - 0s 157us/sample - loss: 0.5695 - accuracy: 0.6914 - val_loss: 0.6272 - val_accuracy: 0.6250
Epoch 16/50
256/256 [==============================] - 0s 173us/sample - loss: 0.5608 - accuracy: 0.6992 - val_loss: 0.6172 - val_accuracy: 0.6172
Epoch 17/50
256/256 [==============================] - 0s 165us/sample - loss: 0.5558 - accuracy: 0.7070 - val_loss: 0.6028 - val_accuracy: 0.6641
Epoch 18/50
256/256 [==============================] - 0s 174us/sample - loss: 0.5412 - accuracy: 0.7266 - val_loss: 0.6003 - val_accuracy: 0.6719
Epoch 19/50
256/256 [==============================] - 0s 161us/sample - loss: 0.5433 - accuracy: 0.7109 - val_loss: 0.6037 - val_accuracy: 0.6641
Epoch 20/50
256/256 [==============================] - 0s 170us/sample - loss: 0.5343 - accuracy: 0.7227 - val_loss: 0.6185 - val_accuracy: 0.6719
Epoch 21/50
256/256 [==============================] - 0s 165us/sample - loss: 0.5485 - accuracy: 0.7031 - val_loss: 0.6049 - val_accuracy: 0.6562
Epoch 22/50
256/256 [==============================] - 0s 173us/sample - loss: 0.5330 - accuracy: 0.6836 - val_loss: 0.5999 - val_accuracy: 0.6719
Epoch 23/50
256/256 [==============================] - 0s 169us/sample - loss: 0.5291 - accuracy: 0.7383 - val_loss: 0.6111 - val_accuracy: 0.6797
Epoch 24/50
256/256 [==============================] - 0s 161us/sample - loss: 0.5186 - accuracy: 0.7305 - val_loss: 0.6219 - val_accuracy: 0.6250
Epoch 25/50
256/256 [==============================] - 0s 165us/sample - loss: 0.5325 - accuracy: 0.7148 - val_loss: 0.6059 - val_accuracy: 0.6719
Epoch 26/50
256/256 [==============================] - 0s 156us/sample - loss: 0.5245 - accuracy: 0.7305 - val_loss: 0.6027 - val_accuracy: 0.6641
Epoch 27/50
256/256 [==============================] - 0s 166us/sample - loss: 0.5293 - accuracy: 0.7109 - val_loss: 0.6102 - val_accuracy: 0.6641
Epoch 28/50
256/256 [==============================] - 0s 164us/sample - loss: 0.5172 - accuracy: 0.7344 - val_loss: 0.6130 - val_accuracy: 0.6719
Epoch 29/50
256/256 [==============================] - 0s 174us/sample - loss: 0.5221 - accuracy: 0.7305 - val_loss: 0.6090 - val_accuracy: 0.6562
Epoch 30/50
256/256 [==============================] - 0s 174us/sample - loss: 0.5112 - accuracy: 0.7422 - val_loss: 0.6184 - val_accuracy: 0.6562
Epoch 31/50
256/256 [==============================] - 0s 164us/sample - loss: 0.4997 - accuracy: 0.7383 - val_loss: 0.6198 - val_accuracy: 0.6641
Epoch 32/50
256/256 [==============================] - 0s 163us/sample - loss: 0.4902 - accuracy: 0.7383 - val_loss: 0.6157 - val_accuracy: 0.6719
Epoch 33/50
256/256 [==============================] - 0s 171us/sample - loss: 0.5003 - accuracy: 0.7422 - val_loss: 0.6121 - val_accuracy: 0.6797
Epoch 34/50
256/256 [==============================] - 0s 167us/sample - loss: 0.4899 - accuracy: 0.7539 - val_loss: 0.6112 - val_accuracy: 0.6797
Epoch 35/50
256/256 [==============================] - 0s 174us/sample - loss: 0.4950 - accuracy: 0.7461 - val_loss: 0.6100 - val_accuracy: 0.6797
Epoch 36/50
256/256 [==============================] - 0s 155us/sample - loss: 0.4771 - accuracy: 0.7812 - val_loss: 0.6066 - val_accuracy: 0.6953
Epoch 37/50
256/256 [==============================] - 0s 165us/sample - loss: 0.4754 - accuracy: 0.7578 - val_loss: 0.6141 - val_accuracy: 0.6797
Epoch 38/50
256/256 [==============================] - 0s 170us/sample - loss: 0.4579 - accuracy: 0.7500 - val_loss: 0.6311 - val_accuracy: 0.6641
Epoch 39/50
256/256 [==============================] - 0s 166us/sample - loss: 0.4721 - accuracy: 0.7539 - val_loss: 0.6145 - val_accuracy: 0.7031
Epoch 40/50
256/256 [==============================] - 0s 169us/sample - loss: 0.4535 - accuracy: 0.7461 - val_loss: 0.6264 - val_accuracy: 0.6953
Epoch 41/50
256/256 [==============================] - 0s 161us/sample - loss: 0.4542 - accuracy: 0.7695 - val_loss: 0.6252 - val_accuracy: 0.6875
Epoch 42/50
256/256 [==============================] - 0s 173us/sample - loss: 0.4411 - accuracy: 0.7656 - val_loss: 0.5989 - val_accuracy: 0.6875
Epoch 43/50
256/256 [==============================] - 0s 166us/sample - loss: 0.4496 - accuracy: 0.7852 - val_loss: 0.6039 - val_accuracy: 0.7031
Epoch 44/50
256/256 [==============================] - 0s 154us/sample - loss: 0.4256 - accuracy: 0.7539 - val_loss: 0.6333 - val_accuracy: 0.6875
Epoch 45/50
256/256 [==============================] - 0s 163us/sample - loss: 0.4185 - accuracy: 0.8008 - val_loss: 0.6300 - val_accuracy: 0.6953
Epoch 46/50
256/256 [==============================] - 0s 166us/sample - loss: 0.4238 - accuracy: 0.7812 - val_loss: 0.5980 - val_accuracy: 0.7031
Epoch 47/50
256/256 [==============================] - 0s 173us/sample - loss: 0.3909 - accuracy: 0.7969 - val_loss: 0.6014 - val_accuracy: 0.7188
Epoch 48/50
256/256 [==============================] - 0s 163us/sample - loss: 0.3817 - accuracy: 0.7773 - val_loss: 0.6406 - val_accuracy: 0.6875
Epoch 49/50
256/256 [==============================] - 0s 168us/sample - loss: 0.3706 - accuracy: 0.8047 - val_loss: 0.6296 - val_accuracy: 0.7188
Epoch 50/50
256/256 [==============================] - 0s 170us/sample - loss: 0.3505 - accuracy: 0.8203 - val_loss: 0.7073 - val_accuracy: 0.6875
Out[11]:
<tensorflow.python.keras.callbacks.History at 0x7f43541cc750>

FFT

In [12]:
ok, errado = valida_dados(dados_sensores_accx)
In [13]:
#Acelerometro
#Treino
train_xAcc = dados_acelerometro_treino.copy()
train_yAcc = dados_rede_y_treino.copy()

#Validação
val_xAcc = dados_acelerometro_teste.copy()
valY = dados_rede_y_teste.copy()
In [14]:
train_x_module = np.sqrt(train_xAcc[:,:,0]**2+train_xAcc[:,:,1]**2+train_xAcc[:,:,2]**2)
print(train_x_module.shape)
train_x_module = train_x_module.reshape((train_x_module.shape[0],train_x_module.shape[1],1))
val_x_module = np.sqrt(val_xAcc[:,:,0]**2+val_xAcc[:,:,1]**2+val_xAcc[:,:,2]**2)
val_x_module = val_x_module.reshape((val_x_module.shape[0],val_x_module.shape[1],1))
print(train_x_module.shape, val_x_module.shape)
(256, 400)
(256, 400, 1) (128, 400, 1)
In [15]:
model = tf.keras.Sequential()
model.add(Dense (16, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense (16, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense (64, activation='relu'))
model.add(Dropout(0.05))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
In [16]:
fft_train_module = converte_dados_buckets_fft(train_x_module)
fft_test_module = converte_dados_buckets_fft(val_x_module)
In [17]:
model.fit(fft_train_module, y_train, epochs=50, verbose=True, shuffle=True, batch_size=1024, validation_data=(fft_test_module, y_val))
Train on 256 samples, validate on 128 samples
Epoch 1/50
256/256 [==============================] - 0s 2ms/sample - loss: 0.7125 - accuracy: 0.5039 - val_loss: 0.6960 - val_accuracy: 0.4844
Epoch 2/50
256/256 [==============================] - 0s 59us/sample - loss: 0.6927 - accuracy: 0.5469 - val_loss: 0.6923 - val_accuracy: 0.4922
Epoch 3/50
256/256 [==============================] - 0s 55us/sample - loss: 0.7124 - accuracy: 0.4453 - val_loss: 0.6909 - val_accuracy: 0.5469
Epoch 4/50
256/256 [==============================] - 0s 55us/sample - loss: 0.6941 - accuracy: 0.5234 - val_loss: 0.6912 - val_accuracy: 0.5078
Epoch 5/50
256/256 [==============================] - 0s 59us/sample - loss: 0.7114 - accuracy: 0.4336 - val_loss: 0.6923 - val_accuracy: 0.5000
Epoch 6/50
256/256 [==============================] - 0s 50us/sample - loss: 0.6950 - accuracy: 0.5156 - val_loss: 0.6935 - val_accuracy: 0.5000
Epoch 7/50
256/256 [==============================] - 0s 62us/sample - loss: 0.6949 - accuracy: 0.5156 - val_loss: 0.6944 - val_accuracy: 0.5000
Epoch 8/50
256/256 [==============================] - 0s 58us/sample - loss: 0.7061 - accuracy: 0.4883 - val_loss: 0.6945 - val_accuracy: 0.5000
Epoch 9/50
256/256 [==============================] - 0s 61us/sample - loss: 0.6983 - accuracy: 0.5000 - val_loss: 0.6940 - val_accuracy: 0.5000
Epoch 10/50
256/256 [==============================] - 0s 57us/sample - loss: 0.7063 - accuracy: 0.4922 - val_loss: 0.6932 - val_accuracy: 0.5000
Epoch 11/50
256/256 [==============================] - 0s 60us/sample - loss: 0.6945 - accuracy: 0.5234 - val_loss: 0.6922 - val_accuracy: 0.5000
Epoch 12/50
256/256 [==============================] - 0s 56us/sample - loss: 0.6967 - accuracy: 0.4805 - val_loss: 0.6911 - val_accuracy: 0.5000
Epoch 13/50
256/256 [==============================] - 0s 54us/sample - loss: 0.6963 - accuracy: 0.5000 - val_loss: 0.6902 - val_accuracy: 0.5000
Epoch 14/50
256/256 [==============================] - 0s 57us/sample - loss: 0.6937 - accuracy: 0.5078 - val_loss: 0.6895 - val_accuracy: 0.5078
Epoch 15/50
256/256 [==============================] - 0s 52us/sample - loss: 0.7040 - accuracy: 0.4727 - val_loss: 0.6890 - val_accuracy: 0.5156
Epoch 16/50
256/256 [==============================] - 0s 49us/sample - loss: 0.6950 - accuracy: 0.5000 - val_loss: 0.6887 - val_accuracy: 0.5312
Epoch 17/50
256/256 [==============================] - 0s 63us/sample - loss: 0.6842 - accuracy: 0.5703 - val_loss: 0.6885 - val_accuracy: 0.5312
Epoch 18/50
256/256 [==============================] - 0s 54us/sample - loss: 0.6949 - accuracy: 0.5039 - val_loss: 0.6884 - val_accuracy: 0.5391
Epoch 19/50
256/256 [==============================] - 0s 60us/sample - loss: 0.6904 - accuracy: 0.5312 - val_loss: 0.6883 - val_accuracy: 0.5391
Epoch 20/50
256/256 [==============================] - 0s 51us/sample - loss: 0.6918 - accuracy: 0.5820 - val_loss: 0.6881 - val_accuracy: 0.5469
Epoch 21/50
256/256 [==============================] - 0s 51us/sample - loss: 0.6923 - accuracy: 0.5352 - val_loss: 0.6880 - val_accuracy: 0.5547
Epoch 22/50
256/256 [==============================] - 0s 61us/sample - loss: 0.6892 - accuracy: 0.5469 - val_loss: 0.6879 - val_accuracy: 0.5469
Epoch 23/50
256/256 [==============================] - 0s 53us/sample - loss: 0.6955 - accuracy: 0.5352 - val_loss: 0.6877 - val_accuracy: 0.5391
Epoch 24/50
256/256 [==============================] - 0s 62us/sample - loss: 0.6958 - accuracy: 0.5195 - val_loss: 0.6875 - val_accuracy: 0.5469
Epoch 25/50
256/256 [==============================] - 0s 61us/sample - loss: 0.6936 - accuracy: 0.5117 - val_loss: 0.6872 - val_accuracy: 0.5469
Epoch 26/50
256/256 [==============================] - 0s 61us/sample - loss: 0.6811 - accuracy: 0.5469 - val_loss: 0.6871 - val_accuracy: 0.5391
Epoch 27/50
256/256 [==============================] - 0s 51us/sample - loss: 0.6943 - accuracy: 0.5352 - val_loss: 0.6869 - val_accuracy: 0.5391
Epoch 28/50
256/256 [==============================] - 0s 60us/sample - loss: 0.6980 - accuracy: 0.4727 - val_loss: 0.6868 - val_accuracy: 0.5391
Epoch 29/50
256/256 [==============================] - 0s 61us/sample - loss: 0.6910 - accuracy: 0.5117 - val_loss: 0.6867 - val_accuracy: 0.5391
Epoch 30/50
256/256 [==============================] - 0s 60us/sample - loss: 0.6927 - accuracy: 0.5078 - val_loss: 0.6865 - val_accuracy: 0.5234
Epoch 31/50
256/256 [==============================] - 0s 56us/sample - loss: 0.6986 - accuracy: 0.4609 - val_loss: 0.6864 - val_accuracy: 0.5234
Epoch 32/50
256/256 [==============================] - 0s 52us/sample - loss: 0.6843 - accuracy: 0.5234 - val_loss: 0.6862 - val_accuracy: 0.5312
Epoch 33/50
256/256 [==============================] - 0s 54us/sample - loss: 0.6939 - accuracy: 0.5078 - val_loss: 0.6861 - val_accuracy: 0.5312
Epoch 34/50
256/256 [==============================] - 0s 55us/sample - loss: 0.6893 - accuracy: 0.5469 - val_loss: 0.6859 - val_accuracy: 0.5156
Epoch 35/50
256/256 [==============================] - 0s 59us/sample - loss: 0.6931 - accuracy: 0.5000 - val_loss: 0.6857 - val_accuracy: 0.5156
Epoch 36/50
256/256 [==============================] - 0s 55us/sample - loss: 0.6869 - accuracy: 0.5547 - val_loss: 0.6855 - val_accuracy: 0.5234
Epoch 37/50
256/256 [==============================] - 0s 59us/sample - loss: 0.6915 - accuracy: 0.5625 - val_loss: 0.6853 - val_accuracy: 0.5156
Epoch 38/50
256/256 [==============================] - 0s 52us/sample - loss: 0.6833 - accuracy: 0.5312 - val_loss: 0.6851 - val_accuracy: 0.5234
Epoch 39/50
256/256 [==============================] - 0s 54us/sample - loss: 0.6819 - accuracy: 0.5781 - val_loss: 0.6849 - val_accuracy: 0.5312
Epoch 40/50
256/256 [==============================] - 0s 59us/sample - loss: 0.6898 - accuracy: 0.5547 - val_loss: 0.6847 - val_accuracy: 0.5312
Epoch 41/50
256/256 [==============================] - 0s 57us/sample - loss: 0.6903 - accuracy: 0.5352 - val_loss: 0.6845 - val_accuracy: 0.5312
Epoch 42/50
256/256 [==============================] - 0s 56us/sample - loss: 0.6792 - accuracy: 0.5898 - val_loss: 0.6841 - val_accuracy: 0.5312
Epoch 43/50
256/256 [==============================] - 0s 55us/sample - loss: 0.6888 - accuracy: 0.5547 - val_loss: 0.6838 - val_accuracy: 0.5391
Epoch 44/50
256/256 [==============================] - 0s 56us/sample - loss: 0.6869 - accuracy: 0.5391 - val_loss: 0.6835 - val_accuracy: 0.5469
Epoch 45/50
256/256 [==============================] - 0s 50us/sample - loss: 0.6923 - accuracy: 0.5234 - val_loss: 0.6833 - val_accuracy: 0.5703
Epoch 46/50
256/256 [==============================] - 0s 59us/sample - loss: 0.6824 - accuracy: 0.5547 - val_loss: 0.6831 - val_accuracy: 0.5781
Epoch 47/50
256/256 [==============================] - 0s 55us/sample - loss: 0.6888 - accuracy: 0.5195 - val_loss: 0.6829 - val_accuracy: 0.5859
Epoch 48/50
256/256 [==============================] - 0s 57us/sample - loss: 0.6911 - accuracy: 0.5000 - val_loss: 0.6827 - val_accuracy: 0.6016
Epoch 49/50
256/256 [==============================] - 0s 58us/sample - loss: 0.6829 - accuracy: 0.5742 - val_loss: 0.6825 - val_accuracy: 0.6016
Epoch 50/50
256/256 [==============================] - 0s 57us/sample - loss: 0.6883 - accuracy: 0.5391 - val_loss: 0.6822 - val_accuracy: 0.6016
Out[17]:
<tensorflow.python.keras.callbacks.History at 0x7f42a477a8d0>
In [26]:
for i in X_train_concat:
    amplitude, frequencia_fft = calculo_fft(i)
    p = np.argwhere(frequencia_fft>65)[0][0]
    q = np.argwhere(frequencia_fft>0.1)[0][0]
    plt.plot(frequencia_fft[q:p],amplitude[q:p])
    plt.show()
In [19]:
X_train_concat = np.concatenate((dados_multiplicados_treino[0][0],dados_multiplicados_treino[1][0],dados_multiplicados_treino[2][0]),axis= 1)
X_test_concat = np.concatenate((dados_multiplicados_teste[0][0],dados_multiplicados_teste[1][0],dados_multiplicados_teste[2][0]),axis= 1)
In [20]:
model_fft = tf.keras.Sequential()
model_fft.add(Dense (16, activation='relu'))
model_fft.add(Dropout(0.1))
model_fft.add(Dense (16, activation='relu'))
model_fft.add(Dropout(0.1))
model_fft.add(Dense (64, activation='relu'))
model_fft.add(Dropout(0.05))
model_fft.add(Dense(1, activation='sigmoid'))
model_fft.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
In [21]:
fft_X_train_concat = converte_dados_buckets_fft(X_train_concat)
fft_X_test_concat = converte_dados_buckets_fft(X_test_concat)
In [22]:
model_fft.fit(fft_X_train_concat, y_train, epochs=50, verbose=True, shuffle=True, batch_size=1024, validation_data=(fft_X_test_concat, y_val))
Train on 256 samples, validate on 128 samples
Epoch 1/50
256/256 [==============================] - 1s 2ms/sample - loss: 1.8412 - accuracy: 0.4844 - val_loss: 1.5357 - val_accuracy: 0.5000
Epoch 2/50
256/256 [==============================] - 0s 56us/sample - loss: 1.6086 - accuracy: 0.4609 - val_loss: 1.3356 - val_accuracy: 0.5000
Epoch 3/50
256/256 [==============================] - 0s 60us/sample - loss: 1.4799 - accuracy: 0.4531 - val_loss: 1.1642 - val_accuracy: 0.5000
Epoch 4/50
256/256 [==============================] - 0s 61us/sample - loss: 1.4562 - accuracy: 0.4453 - val_loss: 1.0319 - val_accuracy: 0.5000
Epoch 5/50
256/256 [==============================] - 0s 52us/sample - loss: 1.1424 - accuracy: 0.4727 - val_loss: 0.9475 - val_accuracy: 0.4531
Epoch 6/50
256/256 [==============================] - 0s 58us/sample - loss: 1.1242 - accuracy: 0.4023 - val_loss: 0.9114 - val_accuracy: 0.2422
Epoch 7/50
256/256 [==============================] - 0s 58us/sample - loss: 0.9947 - accuracy: 0.4023 - val_loss: 0.9143 - val_accuracy: 0.1562
Epoch 8/50
256/256 [==============================] - 0s 56us/sample - loss: 1.1186 - accuracy: 0.3477 - val_loss: 0.9400 - val_accuracy: 0.2734
Epoch 9/50
256/256 [==============================] - 0s 55us/sample - loss: 0.9287 - accuracy: 0.4805 - val_loss: 0.9701 - val_accuracy: 0.4219
Epoch 10/50
256/256 [==============================] - 0s 55us/sample - loss: 1.0368 - accuracy: 0.4453 - val_loss: 0.9894 - val_accuracy: 0.4844
Epoch 11/50
256/256 [==============================] - 0s 60us/sample - loss: 1.0775 - accuracy: 0.4453 - val_loss: 0.9932 - val_accuracy: 0.5000
Epoch 12/50
256/256 [==============================] - 0s 61us/sample - loss: 1.0579 - accuracy: 0.4453 - val_loss: 0.9823 - val_accuracy: 0.5000
Epoch 13/50
256/256 [==============================] - 0s 62us/sample - loss: 1.0423 - accuracy: 0.4414 - val_loss: 0.9588 - val_accuracy: 0.5000
Epoch 14/50
256/256 [==============================] - 0s 55us/sample - loss: 1.0251 - accuracy: 0.4062 - val_loss: 0.9286 - val_accuracy: 0.5000
Epoch 15/50
256/256 [==============================] - 0s 57us/sample - loss: 0.9621 - accuracy: 0.4727 - val_loss: 0.8933 - val_accuracy: 0.5000
Epoch 16/50
256/256 [==============================] - 0s 53us/sample - loss: 0.9410 - accuracy: 0.4609 - val_loss: 0.8556 - val_accuracy: 0.5000
Epoch 17/50
256/256 [==============================] - 0s 59us/sample - loss: 0.9691 - accuracy: 0.4492 - val_loss: 0.8177 - val_accuracy: 0.4922
Epoch 18/50
256/256 [==============================] - 0s 51us/sample - loss: 0.8400 - accuracy: 0.4570 - val_loss: 0.7849 - val_accuracy: 0.4453
Epoch 19/50
256/256 [==============================] - 0s 60us/sample - loss: 0.8456 - accuracy: 0.4922 - val_loss: 0.7579 - val_accuracy: 0.3594
Epoch 20/50
256/256 [==============================] - 0s 55us/sample - loss: 0.8115 - accuracy: 0.4688 - val_loss: 0.7357 - val_accuracy: 0.3359
Epoch 21/50
256/256 [==============================] - 0s 53us/sample - loss: 0.8193 - accuracy: 0.5156 - val_loss: 0.7191 - val_accuracy: 0.3203
Epoch 22/50
256/256 [==============================] - 0s 61us/sample - loss: 0.7806 - accuracy: 0.5117 - val_loss: 0.7074 - val_accuracy: 0.4922
Epoch 23/50
256/256 [==============================] - 0s 58us/sample - loss: 0.7975 - accuracy: 0.4844 - val_loss: 0.6993 - val_accuracy: 0.5156
Epoch 24/50
256/256 [==============================] - 0s 59us/sample - loss: 0.7722 - accuracy: 0.5391 - val_loss: 0.6929 - val_accuracy: 0.5078
Epoch 25/50
256/256 [==============================] - 0s 52us/sample - loss: 0.8033 - accuracy: 0.5156 - val_loss: 0.6874 - val_accuracy: 0.5078
Epoch 26/50
256/256 [==============================] - 0s 53us/sample - loss: 0.7902 - accuracy: 0.5195 - val_loss: 0.6819 - val_accuracy: 0.5078
Epoch 27/50
256/256 [==============================] - 0s 56us/sample - loss: 0.7909 - accuracy: 0.5312 - val_loss: 0.6751 - val_accuracy: 0.5078
Epoch 28/50
256/256 [==============================] - 0s 54us/sample - loss: 0.7766 - accuracy: 0.5469 - val_loss: 0.6674 - val_accuracy: 0.5078
Epoch 29/50
256/256 [==============================] - 0s 62us/sample - loss: 0.7194 - accuracy: 0.5469 - val_loss: 0.6590 - val_accuracy: 0.5391
Epoch 30/50
256/256 [==============================] - 0s 58us/sample - loss: 0.7292 - accuracy: 0.5781 - val_loss: 0.6497 - val_accuracy: 0.5859
Epoch 31/50
256/256 [==============================] - 0s 58us/sample - loss: 0.7371 - accuracy: 0.5586 - val_loss: 0.6410 - val_accuracy: 0.6953
Epoch 32/50
256/256 [==============================] - 0s 61us/sample - loss: 0.7760 - accuracy: 0.5469 - val_loss: 0.6336 - val_accuracy: 0.7500
Epoch 33/50
256/256 [==============================] - 0s 63us/sample - loss: 0.7389 - accuracy: 0.5430 - val_loss: 0.6280 - val_accuracy: 0.7891
Epoch 34/50
256/256 [==============================] - 0s 63us/sample - loss: 0.7430 - accuracy: 0.5391 - val_loss: 0.6242 - val_accuracy: 0.7500
Epoch 35/50
256/256 [==============================] - 0s 55us/sample - loss: 0.7036 - accuracy: 0.6250 - val_loss: 0.6211 - val_accuracy: 0.7578
Epoch 36/50
256/256 [==============================] - 0s 61us/sample - loss: 0.7308 - accuracy: 0.5820 - val_loss: 0.6187 - val_accuracy: 0.7266
Epoch 37/50
256/256 [==============================] - 0s 53us/sample - loss: 0.6818 - accuracy: 0.6094 - val_loss: 0.6167 - val_accuracy: 0.7188
Epoch 38/50
256/256 [==============================] - 0s 63us/sample - loss: 0.7017 - accuracy: 0.5898 - val_loss: 0.6147 - val_accuracy: 0.6953
Epoch 39/50
256/256 [==============================] - 0s 60us/sample - loss: 0.6658 - accuracy: 0.6172 - val_loss: 0.6121 - val_accuracy: 0.6875
Epoch 40/50
256/256 [==============================] - 0s 55us/sample - loss: 0.6836 - accuracy: 0.5977 - val_loss: 0.6092 - val_accuracy: 0.6875
Epoch 41/50
256/256 [==============================] - 0s 54us/sample - loss: 0.6813 - accuracy: 0.5977 - val_loss: 0.6060 - val_accuracy: 0.6953
Epoch 42/50
256/256 [==============================] - 0s 51us/sample - loss: 0.7225 - accuracy: 0.5664 - val_loss: 0.6024 - val_accuracy: 0.7031
Epoch 43/50
256/256 [==============================] - 0s 60us/sample - loss: 0.6405 - accuracy: 0.6406 - val_loss: 0.5983 - val_accuracy: 0.7266
Epoch 44/50
256/256 [==============================] - 0s 59us/sample - loss: 0.6995 - accuracy: 0.5938 - val_loss: 0.5938 - val_accuracy: 0.7266
Epoch 45/50
256/256 [==============================] - 0s 52us/sample - loss: 0.6301 - accuracy: 0.6719 - val_loss: 0.5895 - val_accuracy: 0.7500
Epoch 46/50
256/256 [==============================] - 0s 54us/sample - loss: 0.6293 - accuracy: 0.6445 - val_loss: 0.5853 - val_accuracy: 0.7656
Epoch 47/50
256/256 [==============================] - 0s 58us/sample - loss: 0.6427 - accuracy: 0.6680 - val_loss: 0.5814 - val_accuracy: 0.7891
Epoch 48/50
256/256 [==============================] - 0s 61us/sample - loss: 0.6679 - accuracy: 0.6602 - val_loss: 0.5783 - val_accuracy: 0.8047
Epoch 49/50
256/256 [==============================] - 0s 55us/sample - loss: 0.6321 - accuracy: 0.6523 - val_loss: 0.5760 - val_accuracy: 0.8047
Epoch 50/50
256/256 [==============================] - 0s 57us/sample - loss: 0.6277 - accuracy: 0.6602 - val_loss: 0.5740 - val_accuracy: 0.8203
Out[22]:
<tensorflow.python.keras.callbacks.History at 0x7f4280393850>
In [ ]: